lightningdevkit - rust-lightning
2022-03-03 ยท 5 min read
Github: https://github.com/lightningdevkit/rust-lightning/
ldk-sample node impl #
https://github.com/lightningdevkit/ldk-sample
run:
$ cargo run <bitcoind-rpc-username>:<bitcoind-rpc-password>@<bitcoind-rpc-host>:<bitcoind-rpc-port? <ldk_storage_directory_path> [<ldk-peer-listening-port>] [bitcoin-network] [announced-listen-addr announced-node-name]
deps:
lightning
lightning-block-sync = { features = [ "rpc-client" ] }
lightning-invoice
lightning-net-tokio
lightning-persister
lightning-background-processor
bitcoin
bitcoin-bech32
tokio
looks like the sample assumes some bitcoind
node running locally/accessible via rpc
main #
Simplified and annotated
// primary crate-specific logic
use crate::bitcoind_client::BitcoindClient;
use crate::disk::FilesystemLogger;
// ..
// Is this like the in-memory storage?
// it says "TODO: persist payment into to disk" loe
pub(crate) type PaymentInfoStorage = Arc<Mutex<HashMap<PaymentHash, PaymentInfo>>>;
// what is FilesystemPersister doing here? some extra indexed
// data?
type ChainMonitor = chainmonitor::ChainMonitor<
InMemorySigner,
Arc<dyn Filter + Send + Sync>,
Arc<BitcoindClient>,
Arc<BitcoindClient>,
Arc<FilesystemLogger>,
Arc<FilesystemPersister>,
>;
pub(crate) type PeerManager = SimpleArcPeerManager<
SocketDescriptor,
ChainMonitor,
BitcoindClient,
BitcoindClient,
dyn chain::Access + Send + Sync,
FilesystemLogger,
>;
// ok, this uses ChainMonitor, and chanmgr def needs to persist
// so maybe that's why ChainMonitor has persistance?
pub(crate) type ChannelManager = SimpleArcChannelManager<
ChainMonitor,
BitcoindClient,
BitcoindClient,
FilesystemLogger
>;
// what is E?
pub(crate) type InvoicePayer<E> = payment::InvoicePayer<
Arc<ChannelManager>,
Router,
Arc<Mutex<Scorer>>,
Arc<FilesystemLogger>,
E,
>;
// this is Lightning Network router?
type Router = DefaultRouter<Arc<NetworkGraph>, Arc<FilesystemLogger>>;
// ..
async fn handle_ldk_events(
channel_manager: Arc<ChannelManager>,
bitcoind_client: Arc<BitcoindClient>,
keys_manager: Arc<KeysManager>,
inbound_payments: PaymentInfoStorage,
outbound_payments: PaymentInfoStorage,
network: Network,
event: &Event,
) {
match event {
// client should gen a funding tx, then call
// chanmgr::funding_transaction_generated. event generated in chanmgr.
FundingGenerationReady {
temp_chan_id,
chan_out_value,
out_script,
user_chan_id (or 0 for inbound)
} => {
let raw_tx = bitcoind.create_raw_transaction(outputs).await;
let funded_tx = bitcoind.fund_raw_transaction(raw_tx).await;
let signed_tx =
bitcoind.sign_raw_transaction_with_wallet(funded_tx.hex).await;
let final_tx = encode::deserialize(signed_tx.hex);
chanmgr.funding_transaction_generated(temp_chan_id, final_tx);
}
// got that cash money babyyy
// need to find the payment preimage and feed it to chanmgr::claim_funds
// if we can't find it, hit up chanmgr::fail_htlc_backwards
// note: might get multiple events for a single inbound payment :O
PaymentReceived { payment_hash, purpose, amt, .. } => {
let mut payments = inbound_payments.lock();
let (preimage, secret) = match purpose {
// vvvvvvvv can be None?
PaymentPurpose::Invoice { preimage, secret } => { /*..*/ }
PaymentPurpose::Spontaneous(preimage) => { (preimage, None) }
};
// vvvvvvvv unwrap????
let status = chanmgr.claim_funds(preimage.unwrap());
let status = if status { HTLCStatus::Succeeded } else { Failed };
payments.upsert(payment_hash, { status, preimage, secret, amt });
}
// outbound payment succeeded! made its way to the target and we got
// the preimage back!
PaymentSent { preimage, payment_hash, fee_paid, .. } => {
let mut payments = inbound_payments.lock();
if let Some(payment) = payments.find_mut(payment_hash) {
payment.preimage = Some(preimage);
payment.status = Succeeded;
}
}
// outbound payment failed. PaymentPathFailed events provide extra info
// also for each individual path in a MPP. provided when payment no
// longer retryable (either b/c of many timeouts or
// called chanmgr::abandon_payment).
PaymentFailed { hash, .. } => {
if let Some(payment) = inbound_payments.lock().get_mut(hash) {
payment.status = Failed;
}
}
// gen'd when we successfully forward a payment through our node and
// earn a forwarding fee
PaymentForwarded { fee_earned, claim_from_onchain_tx } => {
log(/* .. */);
}
// you should call chanmgr::process_pending_htlc_forwards some time
// in the future
PendingHTLCsForwardable { time_forwardable } => {
let min = time_forwardable.as_millis();
// spawns a task that calls chanmgr::process_pending_htlc_forwards
// after random time in range [min, 5*min]
}
// some of your tx outputs were confirmed on-chain and are now spendable.
// you _must_ store & spend these outputs asap (?).
SpendableOutputs { outputs } => {
let spending_tx = keysmgr.spend_spendable_outputs(outputs, /* .. */);
bitcoind.broadcast_tx(spending_tx);
}
// one of our channels is being closed. e.g., force close, coop close,
// funding tx timeout, d/c'd peer.
ChannelClosed { chanid, reason, user_chanid } => { /* log */ }
// we can abandon the funding tx and recycle inputs for another purpose
DiscardFunding { chanid, tx } => {
// A "real" node should probably "lock" the UTXOs spent in funding
// transactions until the funding transaction either confirms, or
// this event is generated.
}
// a path for an outbound payment succeeded. useful for scoring nodes
// on the path?
PaymentPathSuccessful { /* ignore? */ }
// an outbound HTLC failed. probably someone dropped us on the floor.
// might be able to retry with a different route.
// Note: doesn't indicate failure for a MPP payment.
PaymentPathFailed => { /* ignore? */ }
}
}
async fn start_ldk() {
// 1. create data directory
// 2. connect to bitcoind rpc
// 3. check we're on same network as bitcoind
// 4. init broadcaster interface (uses bitcoind)
// 5. init persistance (FilesystemPersister(dir))
// 6. init chain monitor (uses bitcoind + fs persister)
// 7. init keys manager (uses a 32B seed file to derive other keys)
// 8. read channel monitor state from disk (could be empty?)
// 9. init channel manager
// 9.a. if no dir/manager file, then fresh node
// 9.a.1. use latest block hash/height and freash chan manager
// 9.b. read chan manager state from fs
// 10. sync chan mons and chan mgr to chain tip (if restarting node)
// 10.a. go through each chan mon and sync
// (via init::synchronize_listeners)
// 11. tell chain mon to watch each chan
// 12. try to read any persisted network graph state
// 13. spawn a task that persists the network graph every 10min
// (note: not safety critical, can always just re-gossip)
// 14. init peer mgr (needs node secret, ln msg handler uses chan mgr
// and route handler (NetGraphMsgHandler))
// 15. init inbound net
// 15.a. bind to port.
// 15.b. spawn a lightning_net_tokio::setup_inbound(peermgr) task for
// every inbound connection.
// 16. spawn a task that polls the best chain tip every 1s.
// 17. spawn a task that persists the scorer (used for scoring channels
// to find lowest fee path) every 10min.
// 18. init main event handler (top-level app events you should handle)
// 19. init InvoicePayer (utility for paying Invoices and sending pymts)
// this takes the event handler, router, chanmgr, and scorer
// 20. init BackgroundProcessor (this fwds events to the InvoicePayer,
// periodically backs up chanmgr, drives clock on chanmgr and
// and peermgr, periodically clear stale chans in net graph)
// 21. spawn a task that tries to reconnect with channel peers every 1s
// 22. spawn a task that broadcasts our node_announcement every 1min
}